# convenience, private and static:
@staticmethod
def
_sigmoid(theta):
return
1/(1+np.exp(-theta))
@staticmethod
def
_add_intercept(X):
return
np.hstack((np.ones((X.shape[0],1)),X))
# add bias term
# public:
def
predict_proba(self, X, add_intercept=True):
# add bias term if requested
Xb = self._add_intercept(X)
if
add_intercept
else
X
return
self._sigmoid(Xb @ self.w_)
# return the probability y=1
def
predict(self,X):
return
(self.predict_proba(X)>0.5)
#return the actual prediction
class
BinaryLogisticRegression(BinaryLogisticRegressionBase):
#private:
def
__str__(self):
if
(hasattr(self,'w_')):
return
'Binary Logistic Regression Object with coefficients:\n'+ str(self.w_)
# is we have train
else
:
return
'Untrained Binary Logistic Regression Object'
def
_get_gradient(self,X,y):
# programming \sum_i (yi-g(xi))xi
gradient = np.zeros(self.w_.shape)
# set gradient to zero
for
(xi,yi)
in
zip(X,y):
# the actual update inside of sum
gradi = (yi - self.predict_proba(xi,add_intercept=False))*xi
# reshape to be column vector and add to gradient
gradient += gradi.reshape(self.w_.shape)
return
gradient/float(len(y))
# public:
def
fit(self, X, y):
Xb = self._add_intercept(X)
# add bias term
num_samples, num_features = Xb.shape
self.w_ = np.zeros((num_features,1))
# init weight vector to zeros
# for as many as the max iterations
for
_
in
range(self.iters):
gradient = self._get_gradient(Xb,y)
self.w_ += gradient*self.eta
# multiply by learning rate
class
VectorBinaryLogisticRegression(BinaryLogisticRegression):
# inherit from our previous class to get same functionality
@staticmethod
def
_sigmoid(theta):
# increase stability, redefine sigmoid operation
return
expit(theta)
#1/(1+np.exp(-theta))
# but overwrite the gradient calculation
def
_get_gradient(self,X,y):
ydiff = y-self.predict_proba(X,add_intercept=False).ravel()
# get y difference